With Tensorflow 2.0, it is recommended that for multi-gpu training you used tf.distribute.MirroredStrategy() instead of keras multi_gpu. You can find more here: https://www.tensorflow.org/guide/distributed_training
In [ ]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# Generate a sample sin wave
x = np.array([np.linspace(0,100,1000)])
y = np.sin(x)
X = np.empty([1000,1000])
Y = np.empty([1000,1000])
for i in range(0,1000):
X[i] = x
Y[i] = y
# Plot the sample sin wave
plt.plot(Y[0])
plt.show()
# mirrored strategy is used to take advantage of local GPUs
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
# Define a model to fit the above data
model = tf.keras.Sequential([
tf.keras.layers.Dropout(rate=0.2, input_shape=x.shape[1:]),
tf.keras.layers.Dense(units=64, activation='sigmoid'),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Dense(units=1000, activation='sigmoid')
])
model.compile(loss='mse',
optimizer='adam',
metrics=['accuracy'])
# Fit the model
model.fit(X,Y, epochs=2000, batch_size=1024)
# Get the prediction
y_hat = model.predict(X)
# Plot the results
plt.plot(y_hat[0])
plt.show()
In [ ]: